2024-11-09 14:15:56,530 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-09 14:15:56,548 main DEBUG Took 0.010570 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 14:15:56,548 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 14:15:56,549 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 14:15:56,550 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 14:15:56,551 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,557 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 14:15:56,571 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,573 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,574 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,574 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,575 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,575 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,577 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,577 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,578 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,578 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,580 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,580 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,581 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,581 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,582 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,582 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,583 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,584 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,584 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,585 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,585 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,586 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,587 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,587 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 14:15:56,588 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,588 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 14:15:56,590 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 14:15:56,592 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 14:15:56,594 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 14:15:56,595 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 14:15:56,596 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 14:15:56,597 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 14:15:56,607 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 14:15:56,611 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 14:15:56,613 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 14:15:56,614 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 14:15:56,614 main DEBUG createAppenders(={Console}) 2024-11-09 14:15:56,615 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 initialized 2024-11-09 14:15:56,615 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-09 14:15:56,616 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 OK. 2024-11-09 14:15:56,616 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 14:15:56,617 main DEBUG OutputStream closed 2024-11-09 14:15:56,617 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 14:15:56,618 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 14:15:56,618 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@25fb8912 OK 2024-11-09 14:15:56,708 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 14:15:56,711 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 14:15:56,712 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 14:15:56,713 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 14:15:56,714 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 14:15:56,714 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 14:15:56,715 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 14:15:56,715 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 14:15:56,716 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 14:15:56,716 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 14:15:56,717 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 14:15:56,717 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 14:15:56,717 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 14:15:56,718 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 14:15:56,718 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 14:15:56,718 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 14:15:56,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 14:15:56,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 14:15:56,722 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 14:15:56,722 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@64a40280) with optional ClassLoader: null 2024-11-09 14:15:56,722 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 14:15:56,723 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@64a40280] started OK. 2024-11-09T14:15:56,740 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.balancer.TestBalancerDecision timeout: 13 mins 2024-11-09 14:15:56,743 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 14:15:56,743 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T14:15:57,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-09T14:15:57,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T14:15:57,385 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=1836 2024-11-09T14:15:57,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-09T14:15:57,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T14:15:57,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-09T14:15:57,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=true, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T14:15:57,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:57,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:57,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:57,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:57,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:57,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:57,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:57,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:57,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1719068027=0, srv2006224723=1} racks are {rack=0} 2024-11-09T14:15:57,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1719068027=0, srv2006224723=1} racks are {rack=0} 2024-11-09T14:15:57,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1351145387=0, srv784784099=1} racks are {rack=0} 2024-11-09T14:15:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1351145387=0, srv784784099=1} racks are {rack=0} 2024-11-09T14:15:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1425303905=0, srv1708428103=1} racks are {rack=0} 2024-11-09T14:15:57,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1425303905=0, srv1708428103=1} racks are {rack=0} 2024-11-09T14:15:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:57,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1497490256=1, srv1274308633=0} racks are {rack=0} 2024-11-09T14:15:57,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1497490256=1, srv1274308633=0} racks are {rack=0} 2024-11-09T14:15:57,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1497490256=1, srv1274308633=0} racks are {rack=0} 2024-11-09T14:15:57,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:57,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323071234=1, srv1156349877=0} racks are {rack=0} 2024-11-09T14:15:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323071234=1, srv1156349877=0} racks are {rack=0} 2024-11-09T14:15:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323071234=1, srv1156349877=0} racks are {rack=0} 2024-11-09T14:15:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323071234=1, srv1156349877=0} racks are {rack=0} 2024-11-09T14:15:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390955499=0, srv62054452=1} racks are {rack=0} 2024-11-09T14:15:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv683989520=1, srv1159885814=0} racks are {rack=0} 2024-11-09T14:15:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:57,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:57,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:57,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:57,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:57,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:57,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv312994207=1, srv1083742289=0} racks are {rack=0} 2024-11-09T14:15:57,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-09T14:15:57,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-09T14:15:57,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-09T14:15:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-09T14:15:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-09T14:15:57,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-09T14:15:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-09T14:15:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-09T14:15:57,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-09T14:15:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-09T14:15:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-09T14:15:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-09T14:15:57,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-09T14:15:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-09T14:15:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-09T14:15:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-09T14:15:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-09T14:15:57,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-09T14:15:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-09T14:15:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-09T14:15:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-09T14:15:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-09T14:15:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-09T14:15:57,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-09T14:15:57,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-09T14:15:57,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-09T14:15:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-09T14:15:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-09T14:15:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-09T14:15:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-09T14:15:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-09T14:15:57,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-09T14:15:57,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-09T14:15:57,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-09T14:15:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-09T14:15:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-09T14:15:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-09T14:15:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-09T14:15:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-09T14:15:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-09T14:15:57,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-09T14:15:57,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-09T14:15:57,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-09T14:15:57,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-09T14:15:57,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-09T14:15:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-09T14:15:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-09T14:15:57,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-09T14:15:57,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-09T14:15:57,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-09T14:15:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-09T14:15:57,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-09T14:15:57,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-09T14:15:57,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-09T14:15:57,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-09T14:15:57,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-09T14:15:57,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-09T14:15:57,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-09T14:15:57,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-09T14:15:57,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-09T14:15:57,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-09T14:15:57,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-09T14:15:57,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-09T14:15:57,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-09T14:15:57,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-09T14:15:57,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-09T14:15:57,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-09T14:15:57,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-09T14:15:57,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-09T14:15:57,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-09T14:15:57,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-09T14:15:57,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-09T14:15:57,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-09T14:15:57,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-09T14:15:57,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-09T14:15:57,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-09T14:15:57,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-09T14:15:57,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-09T14:15:57,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-09T14:15:57,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-09T14:15:57,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-09T14:15:57,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-09T14:15:57,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-09T14:15:57,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-09T14:15:57,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-09T14:15:57,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-09T14:15:57,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-09T14:15:57,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-09T14:15:57,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-09T14:15:57,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-09T14:15:57,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-09T14:15:57,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-09T14:15:57,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-09T14:15:57,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-09T14:15:57,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-09T14:15:57,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-09T14:15:57,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-09T14:15:57,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-09T14:15:57,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-09T14:15:57,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-09T14:15:57,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-09T14:15:57,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-09T14:15:57,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-09T14:15:57,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-09T14:15:57,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-09T14:15:57,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-09T14:15:57,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-09T14:15:57,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-09T14:15:57,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-09T14:15:57,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-09T14:15:57,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-09T14:15:57,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-09T14:15:57,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-09T14:15:57,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-09T14:15:57,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-09T14:15:57,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-09T14:15:57,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-09T14:15:57,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-09T14:15:57,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-09T14:15:57,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-09T14:15:57,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-09T14:15:57,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-09T14:15:57,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-09T14:15:57,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-09T14:15:57,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-09T14:15:57,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-09T14:15:57,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-09T14:15:57,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-09T14:15:57,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-09T14:15:57,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-09T14:15:57,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-09T14:15:57,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-09T14:15:57,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-09T14:15:57,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-09T14:15:57,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-09T14:15:57,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-09T14:15:57,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-09T14:15:57,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-09T14:15:57,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-09T14:15:57,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-09T14:15:57,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-09T14:15:57,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-09T14:15:57,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-09T14:15:57,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-09T14:15:57,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-09T14:15:57,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-09T14:15:57,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-09T14:15:57,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-09T14:15:57,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-09T14:15:57,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-09T14:15:57,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-09T14:15:57,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-09T14:15:57,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-09T14:15:57,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-09T14:15:57,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-09T14:15:57,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-09T14:15:57,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:57,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-09T14:15:57,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-09T14:15:57,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-09T14:15:57,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:57,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:57,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-09T14:15:58,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-09T14:15:58,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-09T14:15:58,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-09T14:15:58,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-09T14:15:58,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-09T14:15:58,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-09T14:15:58,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-09T14:15:58,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:58,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-09T14:15:58,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-09T14:15:58,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-09T14:15:58,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-09T14:15:58,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-09T14:15:58,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-09T14:15:58,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-09T14:15:58,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-09T14:15:58,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-09T14:15:58,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-09T14:15:58,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-09T14:15:58,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-09T14:15:58,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-09T14:15:58,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-09T14:15:58,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-09T14:15:58,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-09T14:15:58,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-09T14:15:58,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-09T14:15:58,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:58,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-09T14:15:58,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:58,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-09T14:15:58,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:58,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-09T14:15:58,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-09T14:15:58,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-09T14:15:58,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-09T14:15:58,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-09T14:15:58,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-09T14:15:58,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-09T14:15:58,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-09T14:15:58,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-09T14:15:58,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:58,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:58,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-09T14:15:58,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-09T14:15:58,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-09T14:15:58,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-09T14:15:58,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-09T14:15:58,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:58,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:58,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-09T14:15:58,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:58,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:58,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:58,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-09T14:15:58,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-09T14:15:58,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:58,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-09T14:15:58,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-09T14:15:58,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-09T14:15:58,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-09T14:15:58,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-09T14:15:58,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-09T14:15:58,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-09T14:15:58,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-09T14:15:58,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-09T14:15:58,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-09T14:15:58,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2000461993=1, srv1676747766=0} racks are {rack=0} 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:58,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:58,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:58,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:58,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787126926=0, srv812105690=1} racks are {rack=0} 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850922926=0, srv612919348=2, srv2040345473=1} racks are {rack=0} 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850922926=0, srv612919348=2, srv2040345473=1} racks are {rack=0} 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565068671=1, srv729919740=2, srv404889512=0} racks are {rack=0} 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565068671=1, srv729919740=2, srv404889512=0} racks are {rack=0} 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565068671=1, srv729919740=2, srv404889512=0} racks are {rack=0} 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv970820368=2, srv905491898=1, srv101971249=0} racks are {rack=0} 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv970820368=2, srv905491898=1, srv101971249=0} racks are {rack=0} 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv908918228=2, srv751696654=1, srv1811989643=0} racks are {rack=0} 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv908918228=2, srv751696654=1, srv1811989643=0} racks are {rack=0} 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv908918228=2, srv751696654=1, srv1811989643=0} racks are {rack=0} 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv481172827=1, srv117595200=0, srv962921809=2} racks are {rack=0} 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv481172827=1, srv117595200=0, srv962921809=2} racks are {rack=0} 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv481172827=1, srv117595200=0, srv962921809=2} racks are {rack=0} 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv481172827=1, srv117595200=0, srv962921809=2} racks are {rack=0} 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:58,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:58,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv902910509=2, srv1177427450=0, srv252548501=1} racks are {rack=0} 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv276186330=1, srv1491709482=0, srv957748518=3, srv616118064=2} racks are {rack=0} 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv276186330=1, srv1491709482=0, srv957748518=3, srv616118064=2} racks are {rack=0} 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv276186330=1, srv1491709482=0, srv957748518=3, srv616118064=2} racks are {rack=0} 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1771794476=0, srv2127084929=2, srv547002027=3, srv2125885191=1} racks are {rack=0} 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1771794476=0, srv2127084929=2, srv547002027=3, srv2125885191=1} racks are {rack=0} 2024-11-09T14:15:58,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1771794476=0, srv2127084929=2, srv547002027=3, srv2125885191=1} racks are {rack=0} 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1771794476=0, srv2127084929=2, srv547002027=3, srv2125885191=1} racks are {rack=0} 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583365428=3, srv317482163=2, srv161534911=0, srv253339633=1} racks are {rack=0} 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583365428=3, srv317482163=2, srv161534911=0, srv253339633=1} racks are {rack=0} 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583365428=3, srv317482163=2, srv161534911=0, srv253339633=1} racks are {rack=0} 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583365428=3, srv317482163=2, srv161534911=0, srv253339633=1} racks are {rack=0} 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583365428=3, srv317482163=2, srv161534911=0, srv253339633=1} racks are {rack=0} 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757831567=0, srv656854345=2, srv49220901=1, srv846896832=3} racks are {rack=0} 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108746180=0, srv280489866=1, srv287465246=2, srv563775500=3} racks are {rack=0} 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1168332935=0, srv1478177249=1, srv565948108=2, srv874821542=3} racks are {rack=0} 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1280771622=0, srv1954528449=2, srv140975473=1, srv2043753746=3} racks are {rack=0} 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1496909666=0, srv496055867=3, srv1803692442=1, srv1814132421=2} racks are {rack=0} 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1254442172=0, srv775824361=1, srv834395454=2, srv893835111=3} racks are {rack=0} 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv28367389=1, srv720809765=2, srv844390280=3, srv1685386324=0} racks are {rack=0} 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-09T14:15:58,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1654394932=3, srv78839116=4, srv1033680649=0, srv1446991638=1, srv1618800311=2} racks are {rack=0} 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:58,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1654394932=3, srv78839116=4, srv1033680649=0, srv1446991638=1, srv1618800311=2} racks are {rack=0} 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1654394932=3, srv78839116=4, srv1033680649=0, srv1446991638=1, srv1618800311=2} racks are {rack=0} 2024-11-09T14:15:58,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1654394932=3, srv78839116=4, srv1033680649=0, srv1446991638=1, srv1618800311=2} racks are {rack=0} 2024-11-09T14:15:58,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:58,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-09T14:15:58,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-09T14:15:58,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-09T14:15:58,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-09T14:15:58,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-09T14:15:58,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-09T14:15:58,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-09T14:15:58,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-09T14:15:58,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-09T14:15:58,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-09T14:15:58,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-09T14:15:58,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-09T14:15:58,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-09T14:15:58,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-09T14:15:58,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-09T14:15:58,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-09T14:15:58,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-09T14:15:58,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-09T14:15:58,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-09T14:15:58,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-09T14:15:58,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-09T14:15:58,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-09T14:15:58,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-09T14:15:58,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-09T14:15:58,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-09T14:15:58,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-09T14:15:58,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-09T14:15:58,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-09T14:15:58,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-09T14:15:58,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-09T14:15:58,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-09T14:15:58,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-09T14:15:58,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-09T14:15:58,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-09T14:15:58,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-09T14:15:58,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-09T14:15:58,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-09T14:15:58,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-09T14:15:58,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-09T14:15:58,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-09T14:15:58,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-09T14:15:58,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-09T14:15:58,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-09T14:15:58,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-09T14:15:58,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-09T14:15:58,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-09T14:15:58,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-09T14:15:58,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-09T14:15:58,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-09T14:15:58,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-09T14:15:58,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-09T14:15:58,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-09T14:15:58,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-09T14:15:58,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-09T14:15:58,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-09T14:15:58,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-09T14:15:58,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-09T14:15:58,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-09T14:15:58,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-09T14:15:58,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-09T14:15:58,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-09T14:15:58,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-09T14:15:58,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-09T14:15:58,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-09T14:15:58,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-09T14:15:58,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-09T14:15:58,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-09T14:15:58,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-09T14:15:58,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:58,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-09T14:15:58,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:58,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:58,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-09T14:15:58,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-09T14:15:58,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-09T14:15:58,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-09T14:15:58,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-09T14:15:58,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:58,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:58,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-09T14:15:58,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-09T14:15:58,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-09T14:15:58,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-09T14:15:58,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:58,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:58,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:58,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:58,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-09T14:15:58,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-09T14:15:58,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:58,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-09T14:15:58,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-09T14:15:58,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-09T14:15:58,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-09T14:15:58,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-09T14:15:58,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-09T14:15:58,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-09T14:15:58,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-09T14:15:58,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-09T14:15:58,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-09T14:15:58,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-09T14:15:58,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-09T14:15:58,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-09T14:15:58,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:58,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-09T14:15:58,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-09T14:15:58,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-09T14:15:58,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-09T14:15:58,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:58,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-09T14:15:58,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-09T14:15:58,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-09T14:15:58,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-09T14:15:58,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:58,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-09T14:15:58,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-09T14:15:58,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-09T14:15:58,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-09T14:15:58,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:58,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:58,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:58,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-09T14:15:58,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:58,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:58,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:58,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-09T14:15:58,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-09T14:15:58,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-09T14:15:58,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-09T14:15:58,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-09T14:15:58,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-09T14:15:58,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-09T14:15:58,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-09T14:15:58,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-09T14:15:58,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-09T14:15:58,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-09T14:15:58,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:58,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:58,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-09T14:15:59,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-09T14:15:59,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1494304553=3, srv955897009=5, srv1184038933=0, srv1352507215=2, srv1328154761=1, srv1760909778=4} racks are {rack=0} 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-09T14:15:59,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-09T14:15:59,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-09T14:15:59,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-09T14:15:59,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-09T14:15:59,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-09T14:15:59,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-09T14:15:59,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-09T14:15:59,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-09T14:15:59,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-09T14:15:59,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-09T14:15:59,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-09T14:15:59,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-09T14:15:59,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-09T14:15:59,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-09T14:15:59,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-09T14:15:59,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-09T14:15:59,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-09T14:15:59,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-09T14:15:59,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-09T14:15:59,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-09T14:15:59,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-09T14:15:59,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-09T14:15:59,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-09T14:15:59,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-09T14:15:59,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-09T14:15:59,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-09T14:15:59,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-09T14:15:59,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-09T14:15:59,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-09T14:15:59,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-09T14:15:59,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-09T14:15:59,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-09T14:15:59,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-09T14:15:59,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-09T14:15:59,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-09T14:15:59,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-09T14:15:59,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-09T14:15:59,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-09T14:15:59,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-09T14:15:59,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-09T14:15:59,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-09T14:15:59,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-09T14:15:59,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-09T14:15:59,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-09T14:15:59,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-09T14:15:59,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-09T14:15:59,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-09T14:15:59,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-09T14:15:59,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-09T14:15:59,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-09T14:15:59,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-09T14:15:59,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-09T14:15:59,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-09T14:15:59,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-09T14:15:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-09T14:15:59,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-09T14:15:59,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-09T14:15:59,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-09T14:15:59,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-09T14:15:59,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-09T14:15:59,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-09T14:15:59,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-09T14:15:59,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-09T14:15:59,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:59,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:59,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:59,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-09T14:15:59,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:59,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-09T14:15:59,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:59,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-09T14:15:59,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-09T14:15:59,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-09T14:15:59,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-09T14:15:59,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-09T14:15:59,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-09T14:15:59,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-09T14:15:59,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-09T14:15:59,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-09T14:15:59,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-09T14:15:59,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-09T14:15:59,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:59,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:59,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:59,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-09T14:15:59,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-09T14:15:59,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-09T14:15:59,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-09T14:15:59,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-09T14:15:59,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-09T14:15:59,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-09T14:15:59,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-09T14:15:59,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-09T14:15:59,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-09T14:15:59,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-09T14:15:59,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-09T14:15:59,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-09T14:15:59,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-09T14:15:59,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-09T14:15:59,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-09T14:15:59,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-09T14:15:59,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-09T14:15:59,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-09T14:15:59,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-09T14:15:59,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-09T14:15:59,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-09T14:15:59,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-09T14:15:59,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-09T14:15:59,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-09T14:15:59,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-09T14:15:59,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-09T14:15:59,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-09T14:15:59,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-09T14:15:59,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-09T14:15:59,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv633083779=4, srv208870408=2, srv2100072300=3, srv1762957674=1, srv666650354=5, srv1596868297=0} racks are {rack=0} 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-09T14:15:59,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv125377528=1, srv1132104059=0, srv1291929238=2, srv606525568=8, srv635740339=10, srv1502489633=4, srv396431116=6, srv1393300879=3, srv180121586=5, srv65668069=11, srv427111449=7, srv635372637=9, srv663031995=12, srv814749880=13, srv8178234=14} racks are {rack=0} 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-09T14:15:59,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2032417809=6, srv1433187269=2, srv1628286878=4, srv1941979078=5, srv1005190378=0, srv522795891=8, srv972595717=9, srv1373582194=1, srv1507925471=3, srv340273618=7} racks are {rack=0} 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv628996403=7, srv1825919508=4, srv159370117=2, srv345392255=5, srv182374906=3, srv807640729=9, srv1097593640=0, srv513588893=6, srv1120297848=1, srv726117748=8} racks are {rack=0} 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1324537921=2, srv122264585=1, srv1852458679=5, srv1894308080=6, srv156948856=3, srv1979011753=7, srv727968898=9, srv583283424=8, srv1698955960=4, srv1014258067=0} racks are {rack=0} 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1825507856=3, srv47869354=8, srv24896890=6, srv355288136=7, srv1900496055=5, srv582010455=9, srv1871296835=4, srv1824747064=2, srv1713144566=1, srv1427580977=0} racks are {rack=0} 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1263401431=0, srv42897692=7, srv1520379401=1, srv164267474=3, srv921009672=9, srv2076760022=5, srv1778379261=4, srv915416905=8, srv1583162924=2, srv2101770004=6} racks are {rack=0} 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv870655213=8, srv1096970718=0, srv1118494493=1, srv1615871710=3, srv750211677=6, srv1612791650=2, srv1844134182=4, srv874582122=9, srv356758188=5, srv754723708=7} racks are {rack=0} 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1711945633=3, srv612050213=9, srv2044488506=6, srv2082858678=7, srv192071701=4, srv1442727065=1, srv1302300572=0, srv1568007309=2, srv196819130=5, srv597983143=8} racks are {rack=0} 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv598472931=7, srv608678616=8, srv1398554718=1, srv1717445246=3, srv440101370=6, srv775933705=9, srv153873697=2, srv129086256=0, srv1940513029=4, srv364849432=5} racks are {rack=0} 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv330620197=7, srv1922868727=3, srv1999438941=4, srv1479705968=1, srv1519789508=2, srv728699027=9, srv1224666731=0, srv2051416982=5, srv2146667853=6, srv390788703=8} racks are {rack=0} 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:59,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:59,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:59,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1913517618=4, srv1644359955=3, srv544086298=7, srv950088869=9, srv1174482748=0, srv1515114687=2, srv367737539=6, srv1430958246=1, srv2090963203=5, srv907728264=8} racks are {rack=0} 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1111174865=0, srv1958667698=5, srv996791137=9, srv1600306260=4, srv1322675580=2, srv290204600=6, srv129866321=1, srv837015462=7, srv967019451=8, srv1502645354=3} racks are {rack=0} 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-09T14:15:59,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1563088637=3, srv1217667591=0, srv1657264052=4, srv485001835=7, srv1427799185=2, srv1830163515=5, srv2137645512=6, srv1300693155=1} racks are {rack=0} 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1939312391=5, srv1240859687=1, srv1374771100=2, srv258650109=6, srv725881625=7, srv1568383205=3, srv1710619266=4, srv1049650008=0} racks are {rack=0} 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:59,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:59,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:59,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:59,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:59,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1530305469=2, srv1336872556=1, srv492890663=6, srv1585516850=3, srv1248147355=0, srv522662074=7, srv2083309114=5, srv1754306133=4} racks are {rack=0} 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:15:59,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-09T14:15:59,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-09T14:15:59,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-09T14:15:59,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-09T14:15:59,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-09T14:15:59,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:15:59,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:15:59,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:15:59,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-09T14:15:59,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:15:59,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:15:59,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:15:59,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:15:59,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1420401023=1, srv408546236=5, srv2093866361=3, srv245267304=4, srv766318504=7, srv1920223428=2, srv421523616=6, srv1043279768=0} racks are {rack=0} 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-09T14:15:59,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv747138134=4, srv1154399057=0, srv1343799012=2, srv1250443997=1, srv191821133=3} racks are {rack=0} 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv747138134=4, srv1154399057=0, srv1343799012=2, srv1250443997=1, srv191821133=3} racks are {rack=0} 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv747138134=4, srv1154399057=0, srv1343799012=2, srv1250443997=1, srv191821133=3} racks are {rack=0} 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv747138134=4, srv1154399057=0, srv1343799012=2, srv1250443997=1, srv191821133=3} racks are {rack=0} 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv747138134=4, srv1154399057=0, srv1343799012=2, srv1250443997=1, srv191821133=3} racks are {rack=0} 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-09T14:15:59,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-09T14:15:59,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:15:59,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:15:59,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:15:59,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:15:59,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:15:59,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:15:59,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:15:59,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:15:59,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:15:59,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:15:59,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:15:59,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:15:59,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:15:59,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:15:59,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:15:59,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:15:59,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:15:59,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:15:59,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:15:59,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:15:59,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:15:59,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-09T14:15:59,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:15:59,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:15:59,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:15:59,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:15:59,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:15:59,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:15:59,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:15:59,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:15:59,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:15:59,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:15:59,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:15:59,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:15:59,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:15:59,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:15:59,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:15:59,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:15:59,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:15:59,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:15:59,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:15:59,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:15:59,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-09T14:15:59,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:15:59,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:15:59,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:15:59,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:15:59,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:15:59,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:15:59,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:15:59,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:15:59,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:15:59,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:15:59,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:15:59,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:15:59,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:15:59,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:15:59,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:15:59,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:15:59,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:15:59,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:15:59,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:15:59,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:15:59,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:15:59,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-09T14:15:59,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:15:59,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:15:59,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:15:59,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:15:59,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:15:59,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:15:59,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:15:59,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:15:59,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:15:59,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:15:59,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:15:59,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-09T14:16:00,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-09T14:16:00,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-09T14:16:00,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-09T14:16:00,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-09T14:16:00,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-09T14:16:00,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-09T14:16:00,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-09T14:16:00,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-09T14:16:00,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-09T14:16:00,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-09T14:16:00,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-09T14:16:00,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-09T14:16:00,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-09T14:16:00,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-09T14:16:00,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-09T14:16:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-09T14:16:00,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-09T14:16:00,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-09T14:16:00,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-09T14:16:00,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-09T14:16:00,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-09T14:16:00,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-09T14:16:00,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-09T14:16:00,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-09T14:16:00,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-09T14:16:00,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-09T14:16:00,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-09T14:16:00,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-09T14:16:00,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-09T14:16:00,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-09T14:16:00,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-09T14:16:00,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-09T14:16:00,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-09T14:16:00,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-09T14:16:00,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-09T14:16:00,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-09T14:16:00,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-09T14:16:00,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-09T14:16:00,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-09T14:16:00,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-09T14:16:00,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1850787721=171, srv347309721=272, srv706960405=346, srv598915708=324, srv1039058695=7, srv482952567=301, srv787558084=362, srv1224953959=41, srv1239810211=46, srv2140209555=237, srv1338363300=72, srv1385694918=85, srv526418301=311, srv688228260=343, srv1683381103=146, srv1256851977=55, srv2051347961=219, srv1859552650=174, srv403438028=283, srv1449415758=101, srv2147084651=239, srv1650353465=138, srv772131646=359, srv1851491485=172, srv160679914=133, srv331454536=269, srv711257429=348, srv1376764234=80, srv1296458439=66, srv1145233281=26, srv1292477704=65, srv214596539=238, srv1997560602=205, srv1908842840=190, srv2024649063=211, srv2130474389=235, srv236624859=245, srv1706284313=151, srv92520504=384, srv827150895=365, srv1593493996=131, srv1148706593=28, srv389271595=282, srv1223587051=40, srv186572965=177, srv2031098247=215, srv1147897387=27, srv312291252=261, srv1065067971=12, srv1875481563=182, srv1387552420=86, srv2101491444=227, srv737164482=349, srv38483285=280, srv1660660501=142, srv1474897030=106, srv408109233=285, srv1106710732=20, srv348668326=274, srv1493655496=112, srv124531787=49, srv209374602=224, srv1867516774=179, srv50759351=305, srv61409328=328, srv1243688652=47, srv746284479=351, srv762909321=356, srv1460939107=103, srv1540517091=123, srv502681362=303, srv263384773=247, srv583999833=321, srv1478005080=108, srv418153300=290, srv1793073383=165, srv472251805=300, srv1243743184=48, srv611147594=326, srv1098766160=19, srv1907566057=189, srv1998607271=206, srv763146079=357, srv421462110=291, srv42932528=293, srv893812751=378, srv1979750969=201, srv854420556=371, srv2112532366=230, srv1108506616=21, srv1605116268=132, srv1090323661=17, srv264088506=248, srv284121550=255, srv865284691=374, srv219349298=240, srv831289930=367, srv1033421811=5, srv1309298908=69, srv522105089=310, srv1418640626=91, srv1341305227=75, srv1533772135=121, srv1519672607=116, srv228729303=243, srv1742681451=157, srv1402008145=87, srv808376027=363, srv422007901=292, srv519776592=309, srv225339393=241, srv1545611240=125, srv917288445=382, srv105814422=8, srv546161986=314, srv500054329=302, srv857053387=372, srv1412696432=89, srv2030307561=213, srv1898591561=188, srv684213949=342, srv1235797043=45, srv356830413=275, srv1190166906=33, srv1063373100=11, srv1004156869=0, srv1257507544=56, srv897874256=380, srv651995466=335, srv1246238010=50, srv1955895669=194, srv132162234=71, srv266056896=249, srv1274806563=59, srv170361194=149, srv1775930151=163, srv678836390=341, srv14382241=99, srv1013116167=2, srv666819440=337, srv2030679363=214, srv1730831885=154, srv605638408=325, srv1155485715=30, srv1253029582=53, srv348027542=273, srv650023670=334, srv1284247714=63, srv1566113266=127, srv2102069254=228, srv678152572=340, srv1225527895=42, srv128132652=61, srv890784448=377, srv1283551638=62, srv375636579=278, srv1090837712=18, srv327193751=266, srv943201068=387, srv1762588955=162, srv174306234=158, srv1523173710=117, srv1983807122=202, srv1653247108=139, srv518846197=308, srv228008728=242, srv129742197=67, srv1472921964=104, srv1717937734=153, srv737457712=350, srv1005874688=1, srv83923605=368, srv311267796=260, srv1961726907=195, srv107946292=15, srv1582358402=130, srv1991635858=204, srv404793200=284, srv1181323192=32, srv510261638=307, srv1340912777=74, srv462935524=299, srv845828585=370, srv613098973=327, srv1703812772=150, srv330117926=268, srv539436025=313, srv1112328360=22, srv1291044379=64, srv1436631254=98, srv1659268696=141, srv509665212=306, srv41195850=288, srv771230758=358, srv1249626650=52, srv1976004021=200, srv762779945=355, srv1079514802=16, srv1473078979=105, srv1708230910=152, srv277331456=253, srv566874609=318, srv1539537175=122, srv505865915=304, srv1264084127=57, srv1984381783=203, srv346968094=271, srv703104979=345, srv990708273=392, srv1246812516=51, srv320894424=264, srv1117661542=23, srv245656417=246, srv1298665065=68, srv1071461125=13, srv1266120968=58, srv289494831=256, srv1368488736=78, srv1799647997=167, srv1610209781=134, srv1817721984=169, srv440879313=294, srv571130093=319, srv1310739989=70, srv2020655851=210, srv1123006139=24, srv1197080925=35, srv197042500=198, srv1027800396=3, srv561058643=316, srv1969180325=197, srv232309473=244, srv1441220010=100, srv1868800492=180, srv2009788090=207, srv2011078753=208, srv269975298=250, srv1540912813=124, srv1578697578=128, srv829633768=366, srv786185552=360, srv1425954373=96, srv1477884402=107, srv1525673133=118, srv1635086051=136, srv973630415=389, srv1736913663=155, srv1178465219=31, srv1383894842=84, srv9659215=388, srv561851701=317, srv632031620=331, srv2055578717=221, srv1777295096=164, srv41156888=287, srv1581687704=129, srv1671493272=144, srv1533458751=120, srv1880586143=184, srv2118562802=232, srv597558872=323, srv533324082=312, srv2105956152=229, srv1153134006=29, srv324086784=265, srv845690324=369, srv1433320198=97, srv2014046787=209, srv1563095745=126, srv454978651=297, srv1280072026=60, srv2040132845=218, srv1377926572=81, srv1058591444=9, srv553619115=315, srv1256251851=54, srv87195799=376, srv974275089=390, srv1420439498=93, srv1865331418=176, srv1483069150=109, srv858473759=373, srv1650118001=137, srv2129676337=234, srv1666060000=143, srv413447639=289, srv2036633356=217, srv1235320623=44, srv289906961=257, srv281532335=254, srv1878131162=183, srv930111460=385, srv1892904602=186, srv1867072094=178, srv1529956389=119, srv1689256628=147, srv1853392391=173, srv1896281634=187, srv186062774=175, srv460964492=298, srv921119783=383, srv357039627=276, srv44902345=296, srv1214901416=38, srv381176803=279, srv1372614123=79, srv315980982=263, srv1490259978=110, srv1059837841=10, srv2095509560=226, srv1869078984=181, srv2055283626=220, srv710750833=347, srv787276953=361, srv62408697=330, srv1221381604=39, srv866895261=375, srv306171712=259, srv895556560=379, srv1923634802=191, srv1680304282=145, srv271187838=251, srv614351533=329, srv1125275973=25, srv644961672=333, srv1519077174=115, srv387349446=281, srv103387309=6, srv1976000762=199, srv1074560093=14, srv333999777=270, srv1353747578=76, srv142029519=92, srv1407726021=88, srv1380709668=83, srv2094192754=225, srv983105009=391, srv1491752497=111, srv1514671645=114, srv2036169396=216, srv162121641=135, srv662645713=336, srv756601921=352, srv910992382=381, srv758318969=354, srv1889699120=185, srv2128817459=233, srv149420360=113, srv211822814=231, srv1699713654=148, srv1808873591=168, srv757521932=353, srv444196662=295, srv639322864=332, srv702428950=344, srv811665280=364, srv1657498642=140, srv367470423=277, srv1028155606=4, srv677776300=339, srv2130831882=236, srv205887234=222, srv2064328446=223, srv313644450=262, srv1355867623=77, srv1955738540=193, srv576961569=320, srv1421632326=94, srv592415856=322, srv1379356492=82, srv1758522387=161, srv1415095975=90, srv330037319=267, srv1753004523=159, srv290914087=258, srv1231660548=43, srv275129781=252, srv409749149=286, srv134067849=73, srv670871358=338, srv94040724=386, srv2027428368=212, srv1192330312=34, srv142392938=95, srv1460880743=102, srv1198701567=36, srv1757069515=160, srv1796360162=166, srv1740728649=156, srv1954901149=192, srv1963129011=196, srv1833301412=170, srv121293904=37} racks are {rack=0} 2024-11-09T14:16:00,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:00,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:00,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:00,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:00,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:00,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:00,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:00,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:00,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:00,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:00,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:00,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-09T14:16:00,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-09T14:16:00,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-09T14:16:00,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1570560480=0, srv1618560941=1} racks are {rack=0} 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-09T14:16:00,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=3200 2024-11-09T14:16:00,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 31 ms to try 3200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1683737754=0, srv606678524=1} racks are {rack=0} 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1208670546=0, srv2010496000=1} racks are {rack=0} 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-09T14:16:00,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv913465220=1, srv1915275247=0} racks are {rack=0} 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=2, number of racks=1 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1046990857=0, srv1579092697=1} racks are {rack=0} 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=2, number of racks=1 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.2888503755054882 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1277392239=1, srv1036482309=0} racks are {rack=0} 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv414584776=1, srv1175591933=0} racks are {rack=0} 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1400012351=0, srv455331919=1} racks are {rack=0} 2024-11-09T14:16:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=2, number of racks=1 2024-11-09T14:16:00,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.6932409012131716 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.8); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1158513680=0, srv1743890932=1} racks are {rack=0} 2024-11-09T14:16:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1432, number of hosts=2, number of racks=1 2024-11-09T14:16:00,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.40878413881917497 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4717368961973279); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv220789019=1, srv1971832290=0} racks are {rack=0} 2024-11-09T14:16:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=53, number of hosts=2, number of racks=1 2024-11-09T14:16:00,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.034662045060658585 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.04000000000000001); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv303113617=2, srv1570968191=0, srv2141105752=1} racks are {rack=0} 2024-11-09T14:16:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-09T14:16:00,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608543, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-09T14:16:00,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 29 ms to try 7200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.5003035261608543 to a new imbalance of 0.004043905257076833. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1605915366=0, srv2071647694=2, srv1883858552=1} racks are {rack=0} 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.25015176308042714 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1214015125=1, srv636538767=2, srv1131531013=0} racks are {rack=0} 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2888503755054882, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 25 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2888503755054882 to a new imbalance of 0.0030329289428076256. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1787297352=0, srv693514608=2, srv245832933=1} racks are {rack=0} 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-09T14:16:00,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 17 ms to try 7200 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1923904771=1, srv721751457=2, srv1489159628=0} racks are {rack=0} 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=3, number of racks=1 2024-11-09T14:16:00,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-09T14:16:00,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 23 ms to try 9600 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:00,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1376195385=0, srv187315523=1, srv890656663=2} racks are {rack=0} 2024-11-09T14:16:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:00,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:00,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=20, number of hosts=3, number of racks=1 2024-11-09T14:16:00,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:00,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42216593343109815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=96000 2024-11-09T14:16:01,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 228 ms to try 96000 different iterations. Found a solution that moves 13 regions; Going from a computed imbalance of 0.42216593343109815 to a new imbalance of 0.003942807625649913. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.325); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv200084388=3, srv1310661562=2, srv1177516580=0, srv1310505924=1} racks are {rack=0} 2024-11-09T14:16:01,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=4, number of racks=1 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25526148491585815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 29 ms to try 19200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25526148491585815 to a new imbalance of 0.0020219526285384167. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1177417651=0, srv358186175=2, srv983008824=3, srv1272432975=1} racks are {rack=0} 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=4, number of racks=1 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=12800 2024-11-09T14:16:01,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 19 ms to try 12800 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.009098786828422877. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.75); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1401503710=1, srv1847325478=2, srv1269244838=0, srv361181007=3} racks are {rack=0} 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=4, number of racks=1 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16000 2024-11-09T14:16:01,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 24 ms to try 16000 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.007279029462738302. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1057054073=0, srv1283767805=1, srv1397564176=2, srv2133814252=3} racks are {rack=0} 2024-11-09T14:16:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608542, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=38400 2024-11-09T14:16:01,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 57 ms to try 38400 different iterations. Found a solution that moves 6 regions; Going from a computed imbalance of 0.5003035261608542 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv917710282=3, srv59790518=1, srv1357495193=0, srv788148582=2} racks are {rack=0} 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6127441778046339, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=25600 2024-11-09T14:16:01,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 37 ms to try 25600 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6127441778046339 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1805387014=1, srv441437388=2, srv876126149=3, srv1066788452=0} racks are {rack=0} 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6852343510309111, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-09T14:16:01,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6852343510309111 to a new imbalance of 0.006932409012131715. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5714285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760416138=1, srv775015504=3, srv362667579=2, srv1427941035=0} racks are {rack=0} 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-09T14:16:01,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 32 ms to try 19200 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1631169942=0, srv277756321=1, srv733996099=2, srv966323751=3} racks are {rack=0} 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:01,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0962834585018294 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.11111111111111113); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv459133609=3, srv1489681528=0, srv1674865512=1, srv2065952651=2} racks are {rack=0} 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=4, number of racks=1 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.17331022530329285 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.19999999999999996); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv963885910=2, srv1133747423=0, srv985051943=3, srv1915001922=1} racks are {rack=0} 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-09T14:16:01,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 35 ms to try 22400 different iterations. Found a solution that moves 5 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008665511265164644. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.7142857142857143); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1529997007=2, srv1277530914=0, srv1350025300=1, srv277866429=4, srv1642118670=3} racks are {rack=0} 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:01,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=5, number of racks=1 2024-11-09T14:16:01,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:01,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.22705408170595567 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.26202041028867284); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:01,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1702177338=1, srv1873623140=2, srv1930424842=3, srv1239168140=0, srv247880153=4, srv838891306=5} racks are {rack=0} 2024-11-09T14:16:01,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:01,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:01,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-09T14:16:01,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:01,444 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:14448000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-09T14:16:01,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.38476461962415054, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-09T14:16:14,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 12721 ms to try 1000000 different iterations. Found a solution that moves 1019 regions; Going from a computed imbalance of 0.38476461962415054 to a new imbalance of 0.004107049292652452. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.33853820598006645); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:14,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1822058838=3, srv1858549952=4, srv1484598409=0, srv1640107874=1, srv646401624=5, srv1665217045=2} racks are {rack=0} 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-09T14:16:14,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:14,211 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:16800000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-09T14:16:14,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2979275647131677, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-09T14:16:27,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 13210 ms to try 1000000 different iterations. Found a solution that moves 919 regions; Going from a computed imbalance of 0.2979275647131677 to a new imbalance of 0.0031854419410745237. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.26257142857142857); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:27,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1355733659=2, srv1827905742=10, srv1038705610=0, srv1578288142=6, srv1454027543=3, srv1635414629=8, srv1766822074=9, srv669706929=12, srv1121158542=1, srv1490535723=4, srv80831983=13, srv1901700104=11, srv956904256=14, srv156539896=5, srv1592866175=7} racks are {rack=0} 2024-11-09T14:16:27,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:27,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:27,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=15, number of hosts=15, number of racks=1 2024-11-09T14:16:27,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:27,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.12507588154021357 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.14433756729740646); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1851802462=7, srv1147845874=0, srv1251526908=1, srv840083275=9, srv1531102686=4, srv762263338=8, srv1310777160=2, srv1318914365=3, srv159918336=5, srv1641430559=6} racks are {rack=0} 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:27,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-09T14:16:27,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 116 ms to try 80000 different iterations. Found a solution that moves 9 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010918544194107453. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv425514707=6, srv1801576458=1, srv1895082703=2, srv428874242=7, srv850647556=9, srv422942310=5, srv1492671632=0, srv1914288023=3, srv2127577533=4, srv471108230=8} racks are {rack=0} 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=10, number of racks=1 2024-11-09T14:16:27,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:27,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.055531997651093117 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.06408392528936147); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1047453003=0, srv1223439252=1, srv305140289=4, srv687467348=8, srv320631075=5, srv2007475312=3, srv869851751=9, srv469703352=6, srv475069948=7, srv1460366774=2} racks are {rack=0} 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:27,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=54, number of hosts=10, number of racks=1 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:27,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164644, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=432000 2024-11-09T14:16:28,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 815 ms to try 432000 different iterations. Found a solution that moves 48 regions; Going from a computed imbalance of 0.8665511265164644 to a new imbalance of 0.01078374735220489. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8888888888888888); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1993980347=5, srv1161531554=3, srv1027200240=1, srv40983169=8, srv684640541=9, srv1403430449=4, srv243666110=7, srv1082335028=2, srv1007247767=0, srv2132086823=6} racks are {rack=0} 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:28,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:28,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:28,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=55, number of hosts=10, number of racks=1 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:28,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=440000 2024-11-09T14:16:29,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 843 ms to try 440000 different iterations. Found a solution that moves 49 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010808255868914448. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8909090909090909); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:29,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181056900=1, srv240835618=5, srv1973971962=3, srv922482394=9, srv400785094=7, srv143924438=2, srv2045007209=4, srv286717226=6, srv83802895=8, srv1025496411=0} racks are {rack=0} 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=10, number of racks=1 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:29,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=448000 2024-11-09T14:16:30,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 848 ms to try 448000 different iterations. Found a solution that moves 50 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010831889081455806. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8928571428571429); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv977432600=9, srv1703487879=3, srv22505276=7, srv826648992=8, srv1105441224=0, srv1407607206=1, srv2044145941=6, srv1796148035=5, srv1717605870=4, srv162552405=2} racks are {rack=0} 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=16, number of hosts=10, number of racks=1 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=128000 2024-11-09T14:16:30,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 199 ms to try 128000 different iterations. Found a solution that moves 14 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.01061525129982669. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.875); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1235420890=3, srv445656250=7, srv469168085=8, srv1067866818=1, srv1556137839=4, srv1061470676=0, srv1686265245=5, srv2102976282=6, srv977484918=9, srv1173487507=2} racks are {rack=0} 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=10, number of racks=1 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.30649131741006164 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.35369098029121115); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1532819880=1, srv893755344=9, srv2037820405=5, srv1218939148=0, srv1932995195=3, srv1937075803=4, srv674325410=8, srv582259141=7, srv551872069=6, srv1742467971=2} racks are {rack=0} 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=9, number of hosts=10, number of racks=1 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.34662045060658575 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.39999999999999997); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1841131691=4, srv1742835088=3, srv1941649813=5, srv123425097=0, srv1480480646=2, srv520544749=7, srv537111410=8, srv1407178747=1, srv607143580=9, srv1963236013=6} racks are {rack=0} 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-09T14:16:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.3851338340073176 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.44444444444444453); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1264443340=2, srv1855545450=4, srv333797848=7, srv1184540153=1, srv334201156=8, srv1907229134=6, srv108855706=0, srv1891370484=5, srv684159766=9, srv1558788479=3} racks are {rack=0} 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=123, number of hosts=10, number of racks=1 2024-11-09T14:16:30,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8002334382626535 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.923469387755102); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2064072808=5, srv1101738625=0, srv1660247907=1, srv1714730759=3, srv2106288034=6, srv75621597=8, srv1672746695=2, srv559693433=7, srv2052174457=4, srv806812323=9} racks are {rack=0} 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=155, number of hosts=10, number of racks=1 2024-11-09T14:16:30,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8131812243798632 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9384111329343621); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1671434994=2, srv251329829=5, srv2009023781=4, srv483272897=7, srv1080264131=0, srv2008023165=3, srv1385315020=1, srv466396701=6} racks are {rack=0} 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.05755254949858986 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0664156421213727); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv168582116=1, srv2066938300=3, srv1309601772=0, srv414673689=6, srv1747951606=2, srv255636526=4, srv373841668=5, srv705093878=7} racks are {rack=0} 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-09T14:16:30,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.06673965003400768 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.07701755613924488); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv239125412=4, srv873846535=6, srv14993951=3, srv380512992=5, srv1081017805=0, srv1217974540=1, srv960240101=7, srv1246489974=2} racks are {rack=0} 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=130, number of hosts=8, number of racks=1 2024-11-09T14:16:30,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.28093705674099306 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.32420136347910594); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv843942771=7, srv1005964019=0, srv1050391914=1, srv1911568388=4, srv2042234951=5, srv1727992283=3, srv1066483262=2, srv603887883=6} racks are {rack=0} 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=140, number of hosts=8, number of racks=1 2024-11-09T14:16:30,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-09T14:16:30,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.07533492111851356 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.08693649897076465); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1856222104=2, srv1121299031=0, srv966727266=4, srv674577336=3, srv1499482584=1} racks are {rack=0} 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=5, number of racks=1 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.21663778162911612, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-09T14:16:30,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 113 ms to try 80000 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.21663778162911612 to a new imbalance of 0.0024263431542461008. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-09T14:16:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1023879493=1, srv2014003210=189, srv1749078047=138, srv331582505=236, srv796964175=346, srv1618345664=112, srv922243412=377, srv521530151=285, srv289897588=230, srv1104161589=15, srv723364021=334, srv1574670037=103, srv991422137=389, srv61153270=313, srv491531332=278, srv921119042=376, srv1259143979=44, srv1328545073=59, srv570110152=303, srv1851493400=158, srv904924979=373, srv1448348055=77, srv1904590039=165, srv1919923871=168, srv149587711=85, srv378690167=246, srv2009423943=188, srv1792504523=144, srv586221858=308, srv709881552=331, srv1902033316=164, srv1158445835=28, srv1549164225=94, srv1110355498=20, srv571907218=304, srv993924588=390, srv513186171=283, srv2034250481=192, srv1038710731=4, srv396050755=254, srv98532392=388, srv1366176924=67, srv1759477118=139, srv637707143=317, srv1400679162=71, srv2070785961=197, srv1579767849=107, srv402343148=256, srv1356760347=65, srv876189841=366, srv2099203728=202, srv302259251=232, srv564556479=300, srv888463058=368, srv274700165=225, srv1465070738=80, srv1150803540=26, srv443488730=264, srv162404145=113, srv550906057=295, srv512692524=282, srv1298277901=54, srv1495287116=84, srv1047837615=7, srv38019190=247, srv530125549=288, srv1567493334=99, srv1552955212=96, srv1968525750=180, srv124441366=41, srv131638365=56, srv1945138518=174, srv363579185=245, srv663015347=319, srv1630180909=114, srv25556762=216, srv272214214=223, srv567633918=302, srv357419873=243, srv348338998=240, srv1693666598=124, srv1921163605=169, srv1981669027=185, srv133705329=63, srv482527517=276, srv884948181=367, srv1724026490=134, srv1575876806=104, srv699610252=328, srv1882747197=160, srv1439726132=76, srv1834178402=153, srv890073968=369, srv818954099=350, srv848651647=354, srv534404444=291, srv1721074433=132, srv1490390202=82, srv447004634=266, srv212333077=204, srv1717948793=130, srv1349921805=64, srv679326597=323, srv454571108=268, srv292473936=231, srv230356217=212, srv866267661=361, srv382984499=249, srv554055885=297, srv796941222=345, srv384093299=250, srv1497489015=87, srv683865639=324, srv1403820749=72, srv1218563442=36, srv273760116=224, srv862148267=358, srv1834013598=152, srv468649336=274, srv281731416=227, srv1577471799=105, srv978252220=387, srv2096771710=200, srv208051006=198, srv2019842236=191, srv1721518703=133, srv1861551035=159, srv1298229790=53, srv1811918797=148, srv537733318=293, srv339214620=238, srv819618934=351, srv1236816343=40, srv875191419=365, srv1972982222=182, srv761219593=341, srv2143962731=206, srv960711350=383, srv717990192=332, srv663773216=320, srv1568021341=100, srv623830694=316, srv129661269=51, srv26076246=218, srv975420629=386, srv454821997=269, srv127444326=49, srv692883225=326, srv1950331825=177, srv1034491091=2, srv1527051171=92, srv1993351675=186, srv1104437728=16, srv1232015897=39, srv1592542534=109, srv761085081=340, srv1762473729=140, srv417289559=257, srv1713789195=128, srv803395540=348, srv849881021=355, srv902112835=370, srv1225612708=37, srv1372889451=68, srv767775401=342, srv1980466094=183, srv1607305583=110, srv1148144245=25, srv2115450246=203, srv1168784937=30, srv1890114087=161, srv617152256=315, srv994457011=392, srv183444596=154, srv1254403498=43, srv271191241=222, srv157082093=101, srv1817245163=149, srv1677628799=122, srv1259229735=45, srv1335270757=62, srv1613392073=111, srv2008814654=187, srv1324909090=58, srv1791158283=143, srv1715537003=129, srv1012011271=0, srv584420770=306, srv760116403=339, srv90926346=374, srv485587910=277, srv189714497=163, srv774372247=343, srv602436003=311, srv220613453=209, srv447192769=267, srv463877775=273, srv744062775=335, srv138117576=70, srv902470026=371, srv1245668209=42, srv671159260=321, srv1154724372=27, srv2051075120=194, srv1430118871=74, srv606157126=312, srv531458930=289, srv1796025667=146, srv865630926=360, srv1742180355=137, srv1496035680=86, srv1054701406=8, srv306432156=234, srv535259859=292, srv653485679=318, srv1094171240=13, srv926464630=378, srv1587401570=108, srv1457256373=79, srv256287084=217, srv513560292=284, srv1820502994=150, srv34640165=239, srv1893732183=162, srv262186066=219, srv1918123639=167, srv1981393805=184, srv223826973=210, srv351674520=242, srv1958325042=179, srv1123694010=22, srv1108530523=19, srv1064269278=10, srv459690652=270, srv1488458232=81, srv532560490=290, srv722859526=333, srv1907774302=166, srv237369137=214, srv1560237535=97, srv1579761300=106, srv867577646=362, srv1947581980=176, srv275474556=226, srv1133257533=23, srv232981947=213, srv439822728=263, srv1104967116=17, srv393512875=253, srv693932013=327, srv305817045=233, srv2054391641=196, srv1098178232=14, srv759182149=338, srv422718372=258, srv205195107=195, srv284997764=228, srv55784601=298, srv1451021499=78, srv1712098458=127, srv1512053228=89, srv445238374=265, srv262517147=220, srv1562819582=98, srv551436661=296, srv1500402377=88, srv1228993093=38, srv542868185=294, srv1183701255=32, srv1364168172=66, srv2043994773=193, srv584987854=307, srv105914675=9, srv111690560=21, srv935925302=379, srv289851191=229, srv1531343416=93, srv593174658=310, srv799185340=347, srv1670019280=120, srv967559579=384, srv611738652=314, srv857520347=356, srv1574220950=102, srv1431092809=75, srv2087543753=199, srv1259765039=46, srv381950824=248, srv566606863=301, srv1068033981=11, srv1068148843=12, srv1183961598=33, srv820869095=352, srv1296716953=52, srv586345923=309, srv1710790664=126, srv1515005505=90, srv503371494=279, srv3511831=241, srv50792484=281, srv564478605=299, srv1134777052=24, srv784343126=344, srv674712986=322, srv1329025088=60, srv438580531=262, srv1043138576=6, srv529124933=287, srv845866292=353, srv386837333=252, srv1324404037=57, srv1709801192=125, srv1841117927=156, srv1719367471=131, srv1954654124=178, srv460241413=271, srv24527116=215, srv754257079=337, srv1039160552=5, srv1931859557=173, srv1945539371=175, srv2145554296=208, srv437828528=261, srv31912087=235, srv688279014=325, srv973503387=385, srv942336684=381, srv528109671=286, srv1802538010=147, srv505509088=280, srv868044273=363, srv469995540=275, srv163527954=115, srv433323929=260, srv1741503009=136, srv1843698267=157, srv1280649981=50, srv1661125269=119, srv1411196089=73, srv1190632262=34, srv2097930614=201, srv1688518944=123, srv704712172=329, srv358447459=244, srv1739204500=135, srv949960490=382, srv1494010349=83, srv1551122086=95, srv574689030=305, srv1175684605=31, srv1925696301=170, srv262759330=221, srv1330710309=61, srv1311888635=55, srv864402881=359, srv1374545227=69, srv1036397629=3, srv462952752=272, srv751764752=336, srv1969699639=181, srv1261368092=47, srv201834647=190, srv38412895=251, srv1768096288=141, srv1525944147=91, srv861166487=357, srv1649940250=118, srv1781353262=142, srv224464472=211, srv1636376840=116, srv126869790=48, srv940257655=380, srv1928444821=172, srv396986546=255, srv809726426=349, srv213119457=205, srv904681337=372, srv994382570=391, srv1216974593=35, srv1838782133=155, srv192637991=171, srv1831604555=151, srv332622833=237, srv1108337611=18, srv425674199=259, srv1675868504=121, srv1795610310=145, srv2144145173=207, srv1163566113=29, srv163871159=117, srv918912590=375, srv870396473=364, srv70635145=330} racks are {rack=0} 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-09T14:16:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-09T14:16:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-09T14:16:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-09T14:16:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-09T14:16:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-09T14:16:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-09T14:16:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-09T14:16:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-09T14:16:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-09T14:16:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-09T14:16:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=393, number of racks=1 2024-11-09T14:16:30,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999962); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:30,411 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:17606400 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-09T14:16:30,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164613, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999962); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-09T14:16:35,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 5012 ms to try 1000000 different iterations. Found a solution that moves 55 regions; Going from a computed imbalance of 0.8665511265164613 to a new imbalance of 0.011915077989601387. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9821428571428571); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-09T14:16:35,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-09T14:16:35,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-09T14:16:35,442 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=13 (was 12) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=286 (was 286), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 198) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=920 (was 1836)